{
struct sh_emulate_ctxt *sh_ctxt =
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
- unsigned int insn_off = offset - ctxt->regs->eip;
+ unsigned int insn_off = offset - sh_ctxt->insn_buf_eip;
/* Fall back if requested bytes are not in the prefetch cache. */
if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
}
/* Attempt to prefetch whole instruction. */
+ sh_ctxt->insn_buf_eip = regs->eip;
sh_ctxt->insn_buf_bytes =
(!hvm_translate_linear_addr(
x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
return &hvm_shadow_emulator_ops;
}
+/* Update an initialized emulation context to prepare for the next
+ * instruction */
+void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
+ struct cpu_user_regs *regs)
+{
+ struct vcpu *v = current;
+ unsigned long addr, diff;
+
+ /* We don't refetch the segment bases, because we don't emulate
+ * writes to segment registers */
+
+ if ( is_hvm_vcpu(v) )
+ {
+ diff = regs->eip - sh_ctxt->insn_buf_eip;
+ if ( diff > sh_ctxt->insn_buf_bytes )
+ {
+ /* Prefetch more bytes. */
+ sh_ctxt->insn_buf_bytes =
+ (!hvm_translate_linear_addr(
+ x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+ hvm_access_insn_fetch, sh_ctxt, &addr) &&
+ !hvm_copy_from_guest_virt(
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+ ? sizeof(sh_ctxt->insn_buf) : 0;
+ sh_ctxt->insn_buf_eip = regs->eip;
+ }
+ }
+}
+
/**************************************************************************/
/* Code for "promoting" a guest page to the point where the shadow code is
* willing to let it be treated as a guest page table. This generally
if ( !shadow_mode_refcounts(d) || !guest_mode(regs) )
goto not_a_shadow_fault;
+ /*
+ * We do not emulate user writes. Instead we use them as a hint that the
+ * page is no longer a page table. This behaviour differs from native, but
+ * it seems very unlikely that any OS grants user access to page tables.
+ */
+ if ( (regs->error_code & PFEC_user_mode) )
+ {
+ SHADOW_PRINTK("user-mode fault to PT, unshadowing mfn %#lx\n",
+ mfn_x(gmfn));
+ perfc_incr(shadow_fault_emulate_failed);
+ sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
+ goto done;
+ }
+
if ( is_hvm_domain(d) )
{
/*
emul_ops = shadow_init_emulation(&emul_ctxt, regs);
- /*
- * We do not emulate user writes. Instead we use them as a hint that the
- * page is no longer a page table. This behaviour differs from native, but
- * it seems very unlikely that any OS grants user access to page tables.
- */
- r = X86EMUL_UNHANDLEABLE;
- if ( !(regs->error_code & PFEC_user_mode) )
- r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
+ r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
/*
* NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
}
+#if GUEST_PAGING_LEVELS == 3 /* PAE guest */
+ if ( r == X86EMUL_OKAY ) {
+ int i;
+ /* Emulate up to four extra instructions in the hope of catching
+ * the "second half" of a 64-bit pagetable write. */
+ for ( i = 0 ; i < 4 ; i++ )
+ {
+ shadow_continue_emulation(&emul_ctxt, regs);
+ v->arch.paging.last_write_was_pt = 0;
+ r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
+ if ( r == X86EMUL_OKAY )
+ {
+ if ( v->arch.paging.last_write_was_pt )
+ {
+ perfc_incr(shadow_em_ex_pt);
+ break; /* Don't emulate past the other half of the write */
+ }
+ else
+ perfc_incr(shadow_em_ex_non_pt);
+ }
+ else
+ {
+ perfc_incr(shadow_em_ex_fail);
+ break; /* Don't emulate again if we failed! */
+ }
+ }
+ }
+#endif /* PAE guest */
+
/* Emulator has changed the user registers: write back */
if ( is_hvm_domain(d) )
hvm_load_cpu_guest_regs(v, regs);
gfn_t gfn;
mfn_t mfn;
+ /* We don't emulate user-mode writes to page tables */
+ if ( ring_3(sh_ctxt->ctxt.regs) )
+ return NULL;
+
+ /* Walk the guest pagetables */
guest_walk_tables(v, vaddr, &gw, 1);
flags = accumulate_guest_flags(v, &gw);
gfn = guest_l1e_get_gfn(gw.eff_l1e);
sh_audit_gw(v, &gw);
unmap_walk(v, &gw);
- if ( !(flags & _PAGE_PRESENT) )
- {
- errcode = 0;
+ errcode = PFEC_write_access;
+ if ( !(flags & _PAGE_PRESENT) )
goto page_fault;
- }
- if ( !(flags & _PAGE_RW) ||
- (!(flags & _PAGE_USER) && ring_3(sh_ctxt->ctxt.regs)) )
- {
- errcode = PFEC_page_present;
+ errcode |= PFEC_page_present;
+ if ( !(flags & _PAGE_RW) )
goto page_fault;
- }
- if ( !mfn_valid(mfn) )
+ if ( mfn_valid(mfn) )
+ {
+ *mfnp = mfn;
+ v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
+ return sh_map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
+ }
+ else
return NULL;
- *mfnp = mfn;
- return sh_map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
-
page_fault:
- errcode |= PFEC_write_access;
if ( is_hvm_vcpu(v) )
hvm_inject_exception(TRAP_page_fault, errcode, vaddr);
else